In [1]:
import fastai
from fastai.vision import *
from fastai.callbacks import *
from fastai.utils.mem import *

from torchvision.models import vgg16_bn
In [2]:
folder = 'photo'
file = 'download.csv'
In [3]:
path = Path('data')
dest = path/folder
dest.mkdir(parents=True, exist_ok=True)
In [4]:
download_images(path/file, dest)
100.00% [601/601 01:42<00:00]
Error https://gb.fotolibra.com/images/larger-thumbnails/105084-stained-glass-st-vitus-cathedral-prague.jpeg HTTPSConnectionPool(host='gb.fotolibra.com', port=443): Max retries exceeded with url: /images/larger-thumbnails/105084-stained-glass-st-vitus-cathedral-prague.jpeg (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')])")))
Error https://gb.fotolibra.com/images/larger-thumbnails/182830-stained-glass-window-in-st-vitus-cathedr.jpeg HTTPSConnectionPool(host='gb.fotolibra.com', port=443): Max retries exceeded with url: /images/larger-thumbnails/182830-stained-glass-window-in-st-vitus-cathedr.jpeg (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')])")))
Error https://gb.fotolibra.com/images/previews/182828-stained-glass-window-in-st-vitus-cathedral-prague.jpeg HTTPSConnectionPool(host='gb.fotolibra.com', port=443): Max retries exceeded with url: /images/previews/182828-stained-glass-window-in-st-vitus-cathedral-prague.jpeg (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')])")))
Error https://previews.123rf.com/images/cascoly2/cascoly21609/cascoly2160900026/63345467-prague-sep-1-2016-stained-glass-window-in-st-vitus-cathedral-prague-czech-republic.jpg HTTPSConnectionPool(host='previews.123rf.com', port=443): Max retries exceeded with url: /images/cascoly2/cascoly21609/cascoly2160900026/63345467-prague-sep-1-2016-stained-glass-window-in-st-vitus-cathedral-prague-czech-republic.jpg (Caused by NewConnectionError('<urllib3.connection.VerifiedHTTPSConnection object at 0x7f38986a59e8>: Failed to establish a new connection: [Errno -2] Name or service not known'))
In [5]:
verify_images(path/folder, delete=True)
100.00% [595/595 00:09<00:00]
cannot identify image file <_io.BufferedReader name='data/photo/00000145.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000547.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000505.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000506.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000445.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000483.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000129.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000155.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000367.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000118.jpg'>
cannot identify image file <_io.BufferedReader name='data/photo/00000491.jpg'>
In [5]:
path_hr = path/'photo'
path_lr = path/'crappy'
In [8]:
from PIL import Image, ImageDraw, ImageFont

class crappifier(object):
    def __init__(self, path_lr, path_hr):
        self.path_lr = path_lr
        self.path_hr = path_hr              
        
    def __call__(self, fn, i):       
        dest = self.path_lr/fn.relative_to(self.path_hr)    
        dest.parent.mkdir(parents=True, exist_ok=True)
        img = PIL.Image.open(fn)
        targ_sz = resize_to(img, 96, use_min=True)
        img = img.resize(targ_sz, resample=PIL.Image.BILINEAR).convert('RGB')
        w,h = img.size
        q = random.randint(10,70)
        fnt0 = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 30)
        fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 10)
        ImageDraw.Draw(img).text((random.randint(0,w//6),random.randint(0,h//6)), str("00:28"), fill=(255,255,255), font=fnt0)
        ImageDraw.Draw(img).text((random.randint(0,w//2),random.randint(0,h//2)), str("CMHK"), fill=(255,255,255), font=fnt)
        ImageDraw.Draw(img).text((random.randint(w//2, w),random.randint(0,h//2)), str("99%"), fill=(255,255,255), font=fnt)
        ImageDraw.Draw(img).rectangle((70, 5, 90, 10), fill=(255,209,0))
        #img.save(dest, quality=q)
        img.save(dest)
In [9]:
il = ImageList.from_folder(path_hr)
parallel(crappifier(path_lr, path_hr), il.items)
100.00% [584/584 00:03<00:00]
In [6]:
bs,size=32, 128
# bs,size = 24,160
#bs,size = 8,256
arch = models.resnet34
In [7]:
arch = models.resnet34
src = ImageImageList.from_folder(path_lr).split_by_rand_pct(0.1, seed=42)
In [8]:
def get_data(bs,size):
    data = (src.label_from_func(lambda x: path_hr/x.name)
           .transform(resize_method=ResizeMethod.SQUISH, size=size, tfm_y=True)
           .databunch(bs=bs).normalize(imagenet_stats, do_y=True))

    data.c = 3
    return data
In [9]:
data_gen = get_data(bs,size)
In [10]:
data_gen.show_batch()
In [11]:
# a matrix that contains every dot product of flattened activations, basically a correlation
#the Gram matrix then is this kind of map — the diagonal is perhaps the most interesting. 
#The diagonal is which channels are the most active and then the off diagonal is which channels tend to appear together. 
#And overall, if two pictures have the same style, 
#then we are expecting that some layer of activations, they will have similar Gram matrices

# dot product would be high when grid cells that have texture also have diagonal
def gram_matrix(x):
    n,c,h,w = x.size()
    x = x.view(n, c, -1)
    return (x @ x.transpose(1,2))/(c*h*w)
In [12]:
base_loss = F.l1_loss
In [13]:
vgg_m = vgg16_bn(True).features.cuda().eval()
requires_grad(vgg_m, False)
In [14]:
blocks = [i-1 for i,o in enumerate(children(vgg_m)) if isinstance(o,nn.MaxPool2d)]
blocks, [vgg_m[i] for i in blocks]
Out[14]:
([5, 12, 22, 32, 42],
 [ReLU(inplace), ReLU(inplace), ReLU(inplace), ReLU(inplace), ReLU(inplace)])
In [15]:
class FeatureLoss(nn.Module):
    def __init__(self, m_feat, layer_ids, layer_wgts):
        super().__init__()
        self.m_feat = m_feat
        self.loss_features = [self.m_feat[i] for i in layer_ids]
        self.hooks = hook_outputs(self.loss_features, detach=False)
        self.wgts = layer_wgts
        self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids))
              ] + [f'gram_{i}' for i in range(len(layer_ids))]

    def make_features(self, x, clone=False):
        self.m_feat(x)
        return [(o.clone() if clone else o) for o in self.hooks.stored]
    
    def forward(self, input, target):
        out_feat = self.make_features(target, clone=True)
        in_feat = self.make_features(input)
        self.feat_losses = [base_loss(input,target)]
        self.feat_losses += [base_loss(f_in, f_out)*w
                             for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
        self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3
                             for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
        self.metrics = dict(zip(self.metric_names, self.feat_losses))
        return sum(self.feat_losses)
    
    def __del__(self): self.hooks.remove()
In [16]:
feat_loss = FeatureLoss(vgg_m, blocks[2:5], [5,15,2])
In [17]:
wd = 1e-3
learn = unet_learner(data_gen, arch, wd=wd, loss_func=feat_loss, callback_fns=LossMetrics,
                     blur=True, norm_type=NormType.Weight)
gc.collect();
In [21]:
learn.lr_find()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
In [22]:
learn.recorder.plot(suggestion=True)
Min numerical gradient: 1.45E-03
In [21]:
lr = 1e-3

def do_fit(save_name, lrs=slice(lr), pct_start=0.9):
    learn.fit_one_cycle(10, lrs, pct_start=pct_start)
    learn.save(save_name)
    learn.show_results(rows=1, imgsize=5)
In [24]:
do_fit('1a', slice(lr*10))
epoch train_loss valid_loss pixel feat_0 feat_1 feat_2 gram_0 gram_1 gram_2 time
0 3.979961 3.454767 0.449164 0.294864 0.346404 0.146217 0.816305 1.151782 0.250031 00:18
1 3.532618 3.088226 0.385050 0.270098 0.319301 0.134761 0.698697 1.048687 0.231632 00:15
2 3.254141 2.790255 0.353427 0.252915 0.295081 0.122318 0.604426 0.946567 0.215520 00:15
3 3.059705 2.787318 0.386410 0.253286 0.287012 0.120033 0.593562 0.938657 0.208359 00:14
4 2.938903 2.715500 0.346835 0.246792 0.285105 0.119583 0.576992 0.926960 0.213232 00:16
5 2.839258 2.620494 0.339174 0.241482 0.278088 0.113893 0.546710 0.897185 0.203963 00:15
6 2.787239 2.632721 0.361974 0.245888 0.282570 0.116879 0.526932 0.891364 0.207114 00:14
7 2.806757 2.857207 0.414379 0.257862 0.297477 0.125252 0.597156 0.945089 0.219992 00:15
8 2.774835 2.528598 0.372357 0.238711 0.269902 0.111034 0.493080 0.846520 0.196993 00:17
9 2.674979 2.376656 0.317726 0.232173 0.261745 0.106634 0.460731 0.808302 0.189345 00:15
In [25]:
learn.unfreeze()
In [26]:
do_fit('1b', slice(1e-5,lr))
epoch train_loss valid_loss pixel feat_0 feat_1 feat_2 gram_0 gram_1 gram_2 time
0 2.347437 2.341745 0.313910 0.230063 0.258815 0.105268 0.449253 0.797030 0.187406 00:15
1 2.334070 2.328634 0.309317 0.228850 0.257486 0.104668 0.449084 0.792634 0.186596 00:15
2 2.319361 2.308834 0.307418 0.227556 0.255543 0.103870 0.445676 0.783033 0.185739 00:15
3 2.304350 2.280955 0.305581 0.225780 0.253007 0.102725 0.437748 0.772667 0.183447 00:16
4 2.287526 2.255143 0.301954 0.224581 0.250914 0.101685 0.433905 0.760140 0.181964 00:16
5 2.271015 2.243234 0.301579 0.222987 0.250011 0.101716 0.426367 0.758990 0.181585 00:15
6 2.250124 2.219876 0.298613 0.221971 0.247497 0.099887 0.422784 0.750307 0.178817 00:15
7 2.231988 2.233363 0.301074 0.221676 0.248297 0.100261 0.421927 0.760780 0.179347 00:16
8 2.206112 2.180260 0.296071 0.219158 0.244015 0.097976 0.413347 0.734439 0.175255 00:16
9 2.175730 2.155706 0.292356 0.218134 0.242277 0.097367 0.407164 0.724317 0.174091 00:15
In [18]:
del data_gen 
In [25]:
data = get_data(6,size*2)
In [26]:
learn.data = data

learn.freeze()
gc.collect()
Out[26]:
92
In [27]:
learn.load('1b');
/home/bf/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/serialization.py:256: UserWarning: Couldn't retrieve source code for container of type FeatureLoss. It won't be checked for correctness upon loading.
  "type " + obj.__name__ + ". It won't be checked "
In [28]:
do_fit('2a')
epoch train_loss valid_loss pixel feat_0 feat_1 feat_2 gram_0 gram_1 gram_2 time
0 2.315125 2.231266 0.369934 0.285513 0.299933 0.109743 0.488053 0.554668 0.123423 01:00
1 2.227990 2.147103 0.370932 0.280855 0.291260 0.104790 0.447575 0.533336 0.118354 00:52
2 2.182577 2.093325 0.372941 0.278849 0.286722 0.103712 0.421964 0.512536 0.116600 00:52
3 2.131435 2.076810 0.370009 0.276538 0.283674 0.102311 0.411734 0.517410 0.115134 00:52
4 2.105607 2.087737 0.379002 0.276452 0.284413 0.103019 0.404947 0.525137 0.114768 00:52
5 2.078314 2.050207 0.369115 0.274189 0.281626 0.102807 0.395314 0.512939 0.114217 00:52
6 2.060336 2.025758 0.365841 0.272653 0.278455 0.101122 0.387638 0.506459 0.113592 00:52
7 2.068998 2.037755 0.369955 0.272075 0.279203 0.101922 0.388984 0.511378 0.114238 00:52
8 2.028965 2.001324 0.362277 0.269654 0.277381 0.100720 0.379864 0.498295 0.113133 00:53
9 1.970103 1.949844 0.358193 0.267996 0.272814 0.099686 0.364466 0.475102 0.111587 00:52
In [29]:
learn.unfreeze()
In [30]:
do_fit('2b', slice(1e-6,1e-4), pct_start=0.3)
epoch train_loss valid_loss pixel feat_0 feat_1 feat_2 gram_0 gram_1 gram_2 time
0 1.914766 1.931704 0.361219 0.267916 0.271677 0.099211 0.355069 0.465740 0.110871 00:55
1 1.908721 1.925010 0.361571 0.267511 0.270685 0.098852 0.353049 0.462652 0.110690 00:55
2 1.904668 1.918424 0.362229 0.267231 0.270786 0.099073 0.350838 0.457506 0.110760 00:55
3 1.899696 1.913786 0.359186 0.267467 0.270624 0.099061 0.350828 0.455822 0.110799 00:55
4 1.895382 1.912751 0.357119 0.267089 0.270499 0.099121 0.352558 0.455618 0.110747 00:55
5 1.873544 1.908435 0.359584 0.267428 0.270159 0.098896 0.347274 0.454719 0.110375 00:56
6 1.866762 1.904210 0.359842 0.266839 0.269933 0.098850 0.346023 0.452547 0.110175 00:55
7 1.855549 1.905028 0.357626 0.267130 0.269941 0.098876 0.347340 0.453931 0.110183 00:55
8 1.857902 1.902777 0.358088 0.266868 0.269744 0.098799 0.346742 0.452468 0.110069 00:55
9 1.861840 1.902536 0.358017 0.266940 0.269817 0.098821 0.346352 0.452493 0.110096 00:55
In [37]:
#learn = None
#gc.collect();
In [32]:
256/320*1024
Out[32]:
819.2
In [33]:
256/320*1600
Out[33]:
1280.0
In [34]:
free = gpu_mem_get_free_no_cache()
# the max size of the test image depends on the available GPU RAM 
if free > 8000: size=(1280, 1600) # >  8GB RAM
else:           size=( 820, 1024) # <= 8GB RAM
print(f"using size={size}, have {free}MB of GPU RAM free")
using size=(820, 1024), have 5304MB of GPU RAM free
In [55]:
#learn = unet_learner(data, arch, loss_func=F.l1_loss, blur=True, norm_type=NormType.Weight)
#data = (src.label_from_func(lambda x: path_hr/x.name)
#       .transform(resize_method=ResizeMethod.SQUISH, tfm_y=True)
#       .databunch(bs=bs).normalize(imagenet_stats, do_y=True))
#data.c = 3
In [56]:
#learn.load('2b');
In [57]:
#img = open_image('testing.jpg'); img.shape
In [58]:
#p,img_hr,b = learn.predict(img)
In [59]:
#show_image(img, figsize=(18,15), interpolation='nearest');
In [60]:
#show_image(img_hr, figsize=(18,15), interpolation='nearest');
In [21]:
size*3
Out[21]:
384
In [20]:
del data_gen
gc.collect();

data = get_data(3,size*3)
learn.data = data
learn.load('2b');
/home/bf/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/serialization.py:256: UserWarning: Couldn't retrieve source code for container of type FeatureLoss. It won't be checked for correctness upon loading.
  "type " + obj.__name__ + ". It won't be checked "
In [20]:
do_fit('3a')
epoch train_loss valid_loss pixel feat_0 feat_1 feat_2 gram_0 gram_1 gram_2 time
0 2.007085 1.943306 0.384215 0.295743 0.291746 0.099862 0.381943 0.396682 0.093116 02:08
1 1.960003 1.929348 0.388330 0.293992 0.289998 0.098863 0.372641 0.393304 0.092219 01:59
2 1.961986 1.928157 0.380067 0.293422 0.289184 0.097640 0.377289 0.399249 0.091306 01:59
3 1.960278 1.938557 0.380762 0.293561 0.288548 0.095934 0.379748 0.407193 0.092810 01:59
4 1.973769 1.952396 0.389198 0.292313 0.290525 0.098659 0.373920 0.416172 0.091607 01:59
5 1.962255 1.915787 0.384615 0.291485 0.286550 0.097480 0.364472 0.400313 0.090873 01:59
6 1.959283 1.948041 0.378028 0.293675 0.289556 0.096639 0.382106 0.416839 0.091199 01:59
7 1.947915 1.956720 0.405519 0.292487 0.288059 0.097771 0.364847 0.416683 0.091354 01:59
8 1.955289 1.922443 0.374685 0.291893 0.285877 0.096817 0.365138 0.417502 0.090532 01:59
9 1.861625 1.848883 0.374304 0.288082 0.281172 0.096166 0.339215 0.381133 0.088811 02:00
In [21]:
learn.unfreeze()
In [22]:
do_fit('3b', slice(1e-6,1e-4), pct_start=0.3)
epoch train_loss valid_loss pixel feat_0 feat_1 feat_2 gram_0 gram_1 gram_2 time
0 1.812646 1.838282 0.374144 0.288141 0.281318 0.096668 0.332768 0.376762 0.088481 02:05
1 1.840341 1.832671 0.375252 0.287373 0.280575 0.096412 0.329527 0.375133 0.088400 02:05
2 1.808424 1.831468 0.373603 0.287739 0.280922 0.096886 0.330843 0.373364 0.088111 02:05
3 1.834574 1.824737 0.374371 0.287031 0.280663 0.097197 0.326665 0.370763 0.088047 02:05
4 1.805104 1.821196 0.371832 0.286454 0.279648 0.096587 0.328972 0.369535 0.088170 02:05
5 1.781077 1.818483 0.374570 0.286928 0.279833 0.096619 0.324422 0.368396 0.087714 02:05
6 1.771750 1.814497 0.372621 0.286892 0.279286 0.096375 0.324633 0.367056 0.087634 02:05
7 1.784114 1.812361 0.372675 0.286823 0.279707 0.096279 0.323536 0.365700 0.087642 02:05
8 1.787838 1.811742 0.372843 0.286605 0.279459 0.096491 0.322904 0.365910 0.087530 02:05
9 1.773693 1.810832 0.372061 0.286601 0.279398 0.096251 0.323505 0.365422 0.087594 02:05
In [21]:
learn.load('3b')
Out[21]:
Learner(data=ImageDataBunch;

Train: LabelList (526 items)
x: ImageImageList
Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384)
y: ImageList
Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384)
Path: data/crappy;

Valid: LabelList (58 items)
x: ImageImageList
Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384)
y: ImageList
Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384),Image (3, 384, 384)
Path: data/crappy;

Test: None, model=DynamicUnet(
  (layers): ModuleList(
    (0): Sequential(
      (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
      (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace)
      (3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
      (4): Sequential(
        (0): BasicBlock(
          (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (1): BasicBlock(
          (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (2): BasicBlock(
          (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
      )
      (5): Sequential(
        (0): BasicBlock(
          (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (downsample): Sequential(
            (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
            (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          )
        )
        (1): BasicBlock(
          (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (2): BasicBlock(
          (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (3): BasicBlock(
          (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
      )
      (6): Sequential(
        (0): BasicBlock(
          (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (downsample): Sequential(
            (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
            (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          )
        )
        (1): BasicBlock(
          (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (2): BasicBlock(
          (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (3): BasicBlock(
          (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (4): BasicBlock(
          (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (5): BasicBlock(
          (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
      )
      (7): Sequential(
        (0): BasicBlock(
          (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (downsample): Sequential(
            (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
            (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          )
        )
        (1): BasicBlock(
          (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
        (2): BasicBlock(
          (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (relu): ReLU(inplace)
          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        )
      )
    )
    (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
    (3): Sequential(
      (0): Sequential(
        (0): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (1): Sequential(
        (0): Conv2d(1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
    )
    (4): UnetBlock(
      (shuf): PixelShuffle_ICNR(
        (conv): Sequential(
          (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1))
        )
        (shuf): PixelShuffle(upscale_factor=2)
        (pad): ReplicationPad2d((1, 0, 1, 0))
        (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)
        (relu): ReLU(inplace)
      )
      (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Sequential(
        (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (conv2): Sequential(
        (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (relu): ReLU()
    )
    (5): UnetBlock(
      (shuf): PixelShuffle_ICNR(
        (conv): Sequential(
          (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1))
        )
        (shuf): PixelShuffle(upscale_factor=2)
        (pad): ReplicationPad2d((1, 0, 1, 0))
        (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)
        (relu): ReLU(inplace)
      )
      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Sequential(
        (0): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (conv2): Sequential(
        (0): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (relu): ReLU()
    )
    (6): UnetBlock(
      (shuf): PixelShuffle_ICNR(
        (conv): Sequential(
          (0): Conv2d(384, 768, kernel_size=(1, 1), stride=(1, 1))
        )
        (shuf): PixelShuffle(upscale_factor=2)
        (pad): ReplicationPad2d((1, 0, 1, 0))
        (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)
        (relu): ReLU(inplace)
      )
      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Sequential(
        (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (conv2): Sequential(
        (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (relu): ReLU()
    )
    (7): UnetBlock(
      (shuf): PixelShuffle_ICNR(
        (conv): Sequential(
          (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1))
        )
        (shuf): PixelShuffle(upscale_factor=2)
        (pad): ReplicationPad2d((1, 0, 1, 0))
        (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)
        (relu): ReLU(inplace)
      )
      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv1): Sequential(
        (0): Conv2d(192, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (conv2): Sequential(
        (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        (1): ReLU(inplace)
      )
      (relu): ReLU()
    )
    (8): PixelShuffle_ICNR(
      (conv): Sequential(
        (0): Conv2d(96, 384, kernel_size=(1, 1), stride=(1, 1))
      )
      (shuf): PixelShuffle(upscale_factor=2)
      (pad): ReplicationPad2d((1, 0, 1, 0))
      (blur): AvgPool2d(kernel_size=2, stride=1, padding=0)
      (relu): ReLU(inplace)
    )
    (9): MergeLayer()
    (10): SequentialEx(
      (layers): ModuleList(
        (0): Sequential(
          (0): Conv2d(99, 99, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (1): ReLU(inplace)
        )
        (1): Sequential(
          (0): Conv2d(99, 99, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (1): ReLU(inplace)
        )
        (2): MergeLayer()
      )
    )
    (11): Sequential(
      (0): Conv2d(99, 3, kernel_size=(1, 1), stride=(1, 1))
    )
  )
), opt_func=functools.partial(<class 'torch.optim.adam.Adam'>, betas=(0.9, 0.99)), loss_func=FeatureLoss(
  (m_feat): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU(inplace)
    (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (5): ReLU(inplace)
    (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (7): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (9): ReLU(inplace)
    (10): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (12): ReLU(inplace)
    (13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (14): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (16): ReLU(inplace)
    (17): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (19): ReLU(inplace)
    (20): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (21): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (22): ReLU(inplace)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (26): ReLU(inplace)
    (27): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (28): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (29): ReLU(inplace)
    (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (31): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (32): ReLU(inplace)
    (33): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (35): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (36): ReLU(inplace)
    (37): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (38): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (39): ReLU(inplace)
    (40): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (41): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (42): ReLU(inplace)
    (43): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
), metrics=[], true_wd=True, bn_wd=True, wd=0.001, train_bn=True, path=PosixPath('data/crappy'), model_dir='models', callback_fns=[functools.partial(<class 'fastai.basic_train.Recorder'>, add_time=True, silent=False), <class 'fastai.callbacks.loss_metrics.LossMetrics'>], callbacks=[], layer_groups=[Sequential(
  (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
  (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (2): ReLU(inplace)
  (3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
  (4): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (5): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (6): ReLU(inplace)
  (7): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (8): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (9): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (10): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (11): ReLU(inplace)
  (12): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (13): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (14): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (15): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (16): ReLU(inplace)
  (17): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (18): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (19): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
  (20): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (21): ReLU(inplace)
  (22): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (23): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (24): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
  (25): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (26): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (27): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (28): ReLU(inplace)
  (29): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (30): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (31): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (32): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (33): ReLU(inplace)
  (34): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (35): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (36): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (37): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (38): ReLU(inplace)
  (39): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (40): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
), Sequential(
  (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
  (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (2): ReLU(inplace)
  (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (5): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
  (6): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (7): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (8): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (9): ReLU(inplace)
  (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (11): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (13): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (14): ReLU(inplace)
  (15): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (16): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (17): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (18): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (19): ReLU(inplace)
  (20): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (21): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (22): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (23): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (24): ReLU(inplace)
  (25): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (26): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (27): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (28): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (29): ReLU(inplace)
  (30): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (31): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (32): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
  (33): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (34): ReLU(inplace)
  (35): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (36): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (37): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
  (38): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (39): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (40): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (41): ReLU(inplace)
  (42): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (43): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (44): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (45): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (46): ReLU(inplace)
  (47): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (48): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
), Sequential(
  (0): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (1): ReLU()
  (2): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (3): ReLU(inplace)
  (4): Conv2d(1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (5): ReLU(inplace)
  (6): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1))
  (7): PixelShuffle(upscale_factor=2)
  (8): ReplicationPad2d((1, 0, 1, 0))
  (9): AvgPool2d(kernel_size=2, stride=1, padding=0)
  (10): ReLU(inplace)
  (11): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (12): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (13): ReLU(inplace)
  (14): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (15): ReLU(inplace)
  (16): ReLU()
  (17): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1))
  (18): PixelShuffle(upscale_factor=2)
  (19): ReplicationPad2d((1, 0, 1, 0))
  (20): AvgPool2d(kernel_size=2, stride=1, padding=0)
  (21): ReLU(inplace)
  (22): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (23): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (24): ReLU(inplace)
  (25): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (26): ReLU(inplace)
  (27): ReLU()
  (28): Conv2d(384, 768, kernel_size=(1, 1), stride=(1, 1))
  (29): PixelShuffle(upscale_factor=2)
  (30): ReplicationPad2d((1, 0, 1, 0))
  (31): AvgPool2d(kernel_size=2, stride=1, padding=0)
  (32): ReLU(inplace)
  (33): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (34): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (35): ReLU(inplace)
  (36): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (37): ReLU(inplace)
  (38): ReLU()
  (39): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1))
  (40): PixelShuffle(upscale_factor=2)
  (41): ReplicationPad2d((1, 0, 1, 0))
  (42): AvgPool2d(kernel_size=2, stride=1, padding=0)
  (43): ReLU(inplace)
  (44): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (45): Conv2d(192, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (46): ReLU(inplace)
  (47): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (48): ReLU(inplace)
  (49): ReLU()
  (50): Conv2d(96, 384, kernel_size=(1, 1), stride=(1, 1))
  (51): PixelShuffle(upscale_factor=2)
  (52): ReplicationPad2d((1, 0, 1, 0))
  (53): AvgPool2d(kernel_size=2, stride=1, padding=0)
  (54): ReLU(inplace)
  (55): MergeLayer()
  (56): Conv2d(99, 99, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (57): ReLU(inplace)
  (58): Conv2d(99, 99, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (59): ReLU(inplace)
  (60): MergeLayer()
  (61): Conv2d(99, 3, kernel_size=(1, 1), stride=(1, 1))
)], add_time=True, silent=None)
In [22]:
fn = data.train_ds.x.items[0]; fn
img = open_image(fn); img.shape
p,img_hr,b = learn.predict(img)
show_image(img, figsize=(18,15), interpolation='nearest');
In [23]:
show_image(img_hr, figsize=(18,15), interpolation='nearest');
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
In [24]:
def get_test_data(bs):
    data = (src.label_from_func(lambda x: path_hr/x.name)
           .transform(resize_method=ResizeMethod.SQUISH, size=(1000) ,tfm_y=True)
           .databunch(bs=bs).normalize(imagenet_stats, do_y=True))

    data.c = 3
    return data
In [25]:
data = get_test_data(1)
In [26]:
learn.data = data
In [27]:
img = open_image('testing.jpg'); img.shape
p,img_hr,b = learn.predict(img)
In [28]:
show_image(img, figsize=(18,15), interpolation='nearest');
In [29]:
show_image(Image(img_hr).resize((3, 1280, 719)), figsize=(18,15), interpolation='nearest')
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Out[29]:
<matplotlib.axes._subplots.AxesSubplot at 0x7efbc58e8c88>
In [ ]: